sync_lazy_execstate functions are now all arch-specific. We may want
to finetune their behaviour later.
Signed-off-by: Keir Fraser <keir@xensource.com>
/* calls in xen/common code that are unused on ia64 */
+void sync_lazy_execstate_cpuset(unsigned long cpuset) {}
+void sync_lazy_execstate_all(void) {}
+
int grant_table_create(struct domain *d) { return 0; }
void grant_table_destroy(struct domain *d)
{
return 1;
}
+void sync_lazy_execstate_cpuset(unsigned long cpuset)
+{
+ flush_tlb_mask(cpuset);
+}
+
+void sync_lazy_execstate_all(void)
+{
+ flush_tlb_all();
+}
+
unsigned long __hypercall_create_continuation(
unsigned int op, unsigned int nr_args, ...)
{
* Force loading of currently-executing domain state on the specified set
* of CPUs. This is used to counteract lazy state switching where required.
*/
-#define sync_lazy_execstate_cpuset(_cpuset) flush_tlb_mask(_cpuset)
-#define sync_lazy_execstate_all() flush_tlb_all()
+extern void sync_lazy_execstate_cpuset(unsigned long cpuset);
+extern void sync_lazy_execstate_all(void);
extern int __sync_lazy_execstate(void);
extern void context_switch(